bitkeeper revision 1.862 (407bfad05PmdmZtBnhfJMLp1dqPBYQ)
authorkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 13 Apr 2004 14:36:00 +0000 (14:36 +0000)
committerkaf24@scramble.cl.cam.ac.uk <kaf24@scramble.cl.cam.ac.uk>
Tue, 13 Apr 2004 14:36:00 +0000 (14:36 +0000)
Various cleanups. Also fix a bug in Xenolinux timer interrupt.

xen/common/schedule.c
xen/include/xen/event.h
xenolinux-2.4.25-sparse/arch/xen/kernel/entry.S
xenolinux-2.4.25-sparse/arch/xen/kernel/time.c
xenolinux-2.4.25-sparse/include/asm-xen/system.h

index 2655b08c285eb0c55daa305559da527934ad8473..dfacb65bd0a38ce11acdf7070015f9696d8bd831 100644 (file)
@@ -558,8 +558,6 @@ static void t_timer_fn(unsigned long unused)
 
     TRACE_0D(TRC_SCHED_T_TIMER_FN);
 
-    TRACE_0D(TRC_SCHED_T_TIMER_FN);
-
     if ( !is_idle_task(p) )
         send_guest_virq(p, VIRQ_TIMER);
 
index 542cd3c6ef342e33d076396626e5fe9c9940b619..320a265a4cb8e683b14a30630a865f9fca34b16e 100644 (file)
@@ -56,7 +56,7 @@ static inline void evtchn_set_pending(struct task_struct *p, int port)
          !test_and_set_bit(port>>5, &s->evtchn_pending_sel) )
     {
         /* The VCPU pending flag must be set /after/ update to evtchn-pend. */
-        p->shared_info->vcpu_data[0].evtchn_upcall_pending = 1;
+        s->vcpu_data[0].evtchn_upcall_pending = 1;
         guest_schedule_to_run(p);
     }
 }
index b78c74fd9c15140e38e17e9304e2e64139dc70c2..305bd42c702ee1682bb073e9f91cd01defe917bb 100644 (file)
@@ -15,7 +15,7 @@
  * I changed all the .align's to 4 (16 byte alignment), as that's faster
  * on a 486.
  *
- * Stack layout in 'ret_from_system_call':
+ * Stack layout in 'ret_to_user':
  *     ptrace needs to have all regs on the stack.
  *     if the order here is changed, it needs to be
  *     updated in fork.c:copy_process, signal.c:do_signal,
@@ -157,7 +157,7 @@ ENTRY(lcall7)
        call *%edx
        addl $4, %esp
        popl %eax
-       jmp ret_from_sys_call
+       jmp ret_to_user
 
 ENTRY(lcall27)
        pushfl                  # We get a different stack layout with call
@@ -181,7 +181,7 @@ ENTRY(lcall27)
        call *%edx
        addl $4, %esp
        popl %eax
-       jmp ret_from_sys_call
+       jmp ret_to_user
 
 ENTRY(ret_from_fork)
        pushl %ebx
@@ -190,7 +190,7 @@ ENTRY(ret_from_fork)
        GET_CURRENT(%ebx)
        testb $0x02,tsk_ptrace(%ebx)    # PT_TRACESYS
        jne tracesys_exit
-       jmp     ret_from_sys_call
+       jmp ret_to_user
 
 /*
  * Return to user mode is not as complex as all this looks,
@@ -208,15 +208,15 @@ ENTRY(system_call)
        jae badsys
        call *SYMBOL_NAME(sys_call_table)(,%eax,4)
        movl %eax,EAX(%esp)             # save the return value
-ENTRY(ret_from_sys_call)
+ret_to_user:
         movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
         movb $1,evtchn_upcall_mask(%esi) # make tests atomic
-ret_syscall_tests:
+ret_to_user_nocli:
        cmpl $0,need_resched(%ebx)
-       jne reschedule
+       jne  reschedule
        cmpl $0,sigpending(%ebx)
        je   safesti                    # ensure need_resched updates are seen
-signal_return:
+/*signal_return:*/
        movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks
        movl %esp,%eax
        xorl %edx,%edx
@@ -238,10 +238,10 @@ tracesys:
        movl %eax,EAX(%esp)             # save the return value
 tracesys_exit:
        call SYMBOL_NAME(syscall_trace)
-       jmp ret_from_sys_call
+       jmp ret_to_user
 badsys:
        movl $-ENOSYS,EAX(%esp)
-       jmp ret_from_sys_call
+       jmp ret_to_user
 
        ALIGN
 ENTRY(ret_from_intr)
@@ -249,14 +249,14 @@ ENTRY(ret_from_intr)
 ret_from_exception:
        movb CS(%esp),%al
        testl $2,%eax
-       jne ret_from_sys_call
+       jne ret_to_user
        jmp restore_all
 
        ALIGN
 reschedule:
         movb $0,evtchn_upcall_mask(%esi)  # reenable event callbacks
        call SYMBOL_NAME(schedule)        # test
-       jmp  ret_from_sys_call
+       jmp  ret_to_user
 
 ENTRY(divide_error)
        pushl $0                # no error code
@@ -316,7 +316,7 @@ ENTRY(hypervisor_callback)
         movl SYMBOL_NAME(HYPERVISOR_shared_info),%esi
         movb CS(%esp),%cl
        test $2,%cl          # slow return to ring 2 or 3
-       jne  ret_syscall_tests
+       jne  ret_to_user_nocli
 safesti:movb $0,evtchn_upcall_mask(%esi) # reenable event callbacks
 scrit:  /**** START OF CRITICAL REGION ****/
         testb $0xFF,evtchn_upcall_pending(%esi)
index 3674aef2ef2a241a20155529104ff5abd3358b0a..52920cd0fc9569f40ef24ddcd99c4443e4aad207 100644 (file)
@@ -492,8 +492,7 @@ static inline void do_timer_interrupt(int irq, void *dev_id,
 static void timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 {
     write_lock(&xtime_lock);
-    while ( !TIME_VALUES_UP_TO_DATE )
-        do_timer_interrupt(irq, NULL, regs);
+    do_timer_interrupt(irq, NULL, regs);
     write_unlock(&xtime_lock);
 }
 
index 40e11d3e689bcd20cb8d97959b5e3b574cff9e9d..6a8e352ac55f3bfd965a7217abf1069bcfefe32e 100644 (file)
@@ -351,9 +351,13 @@ do {                                                                          \
 
 #define __save_and_sti(x)                                                     \
 do {                                                                          \
+    shared_info_t *_shared = HYPERVISOR_shared_info;                          \
     barrier();                                                                \
-    (x) = HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask;            \
-    HYPERVISOR_shared_info->vcpu_data[0].evtchn_upcall_mask = 0;              \
+    (x) = _shared->vcpu_data[0].evtchn_upcall_mask;                           \
+    _shared->vcpu_data[0].evtchn_upcall_mask = 0;                             \
+    barrier(); /* unmask then check (avoid races) */                          \
+    if ( unlikely(_shared->vcpu_data[0].evtchn_upcall_pending) )              \
+        evtchn_do_upcall(NULL);                                               \
 } while (0)
 
 #define local_irq_save(x)       __save_and_cli(x)